Testing the different components of the enveloppe
Motion Clouds: testing components of the envelope¶
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
def show_mc(name):
import os
from IPython.display import display, Image
if os.path.isfile(name + mc.ext):
display(Image(filename=name + mc.ext))
if os.path.isfile(name + '_cube' + mc.ext):
display(Image(filename=name + '_cube' + mc.ext))
from IPython.display import HTML
from base64 import b64encode
video = open(name + mc.vext, "rb").read()
video_encoded = b64encode(video)
video_tag = '<video controls autoplay="autoplay" loop="loop" width=50% src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
display(HTML(data=video_tag))
Testing the color¶
name = 'color'
z = mc.envelope_color(fx, fy, ft)
if mc.anim_exist(mc.figpath + name): mc.figures(z, mc.figpath + name)
show_mc(mc.figpath + name)
# explore parameters
for alpha in [0.0, 0.5, 1.0, 1.5, 2.0]:
# resp. white(0), pink(1), red(2) or brownian noise (see http://en.wikipedia.org/wiki/1/f_noise
name_ = mc.figpath + name + '-alpha-' + str(alpha).replace('.', '_')
if mc.anim_exist(name_):
z = mc.envelope_color(fx, fy, ft, alpha)
mc.figures(z, name_)
show_mc(name_)
for ft_0 in [0.125, 0.25, 0.5, 1., 2., 4.]:# time space scaling
name_ = mc.figpath + name + '-ft_0-' + str(ft_0).replace('.', '_')
if mc.anim_exist(name_):
z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
mc.figures(z, name_)
show_mc(name_)
for contrast in [0.1, 0.25, 0.5, 0.75, 1.0, 2.0]:
name_ = mc.figpath + name + '-contrast-' + str(contrast).replace('.', '_')
if mc.anim_exist(name_):
im = mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft)), contrast)
mc.anim_save(im, name_, display=False)
show_mc(name_)
for contrast in [0.1, 0.25, 0.5, 0.75, 1.0, 2.0]:
name_ = mc.figpath + name + '-energy_contrast-' + str(contrast).replace('.', '_')
if mc.anim_exist(name_):
im = mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft)), contrast, method='energy')
mc.anim_save(im, name_, display=False)
show_mc(name_)
for seed in [123456 + step for step in range(7)]:
name_ = mc.figpath + name + '-seed-' + str(seed)
if mc.anim_exist(name_):
mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), name_, display=False)
show_mc(name_)
for size in range(5, 7):
N_X, N_Y, N_frame = 2**size, 2**size, 2**size
fx, fy, ft = mc.get_grids(N_X, N_Y, N_frame)
ft_0 = N_X/float(N_frame)
name_ = mc.figpath + name + '-size-' + str(size).replace('.', '_')
if mc.anim_exist(name_):
z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
mc.figures(z, name_)
show_mc(name_)
for size in range(5, 7):
N_frame = 2**size
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, N_frame)
ft_0 = N_X/float(N_frame)
name_ = mc.figpath + name + '-size_T-' + str(size).replace('.', '_')
if mc.anim_exist(name_):
z = mc.envelope_color(fx, fy, ft, ft_0=ft_0)
mc.figures(z, name_, do_figs=False)
show_mc(name_)
#name = 'colorfull'
#N = 256 #512
#fx, fy, ft = mc.get_grids(N, N, N)
#for seed in [123456 + step for step in range(1)]:
# if mc.anim_exist(mc.figpath + name):
# mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), mc.figpath + name, display=False)
# mc.anim_save(mc.rectif(mc.random_cloud(mc.envelope_color(fx, fy, ft), seed=seed)), mc.figpath + name, display=False, vext='.mat')
Testing the speed¶
Testing competing Motion Clouds with psychopy
Analysis of a discrimination experiment¶
In the psychopy_competition.py script, we implemented an experiment to test whether one could discriminate the dominating motionin a sum of two motion cloudsin opposite directions.
Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.
The experiment¶
#%pycat psychopy_competing.py
Analysis, version 1¶
In a first version of the experiment, we only stored the results in a numpy file:
!ls data/competing_v1_*npy
import glob
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/competing_v1_*npy'):
results = np.load(fn)
print 'experiment ', fn, ', # trials=', results.shape[1]
ax.scatter(results[1, :], results[0, :])
_ = ax.axis([0., 1., -1.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax.set_ylabel('perceived direction')
alpha = .3
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
data = []
for fn in glob.glob('data/competing_v1_*npy'):
results = np.load(fn)
data_ = np.empty(results.shape)
# lower stronger, detected lower = CORRECT is 1
ax1.scatter(results[1, results[1, :]<.5],
1.*(results[0, results[1, :]<.5]==-1),
c='r', alpha=alpha)
# copying data: contrast (from .5 to 1), correct
data_[0, results[1, :]<.5] = 1. - results[1, results[1, :]<.5]
data_[1, results[1, :]<.5] = 1.*(results[0, results[1, :]<.5]==-1)
# upper stronger, detected lower = INCORRECT is 1
ax1.scatter(results[1, results[1, :]>.5],
1.*(results[0, results[1, :]>.5]==-1),
c='b', alpha=alpha)
# lower stronger, detected upper = INCORRECT is 1
ax2.scatter(results[1, results[1, :]<.5],
1.*(results[0, results[1, :]<.5]==1),
c='b', alpha=alpha, marker='x')
# upper stronger, detected upper = CORRECT is 1
ax2.scatter(results[1, results[1, :]>.5],
1.*(results[0, results[1, :]>.5]==1),
c='r', alpha=alpha, marker='x')
# copying data: contrast (from .5 to 1), correct
data_[0, results[1, :]>=.5] = results[1, results[1, :]>=.5]
data_[1, results[1, :]>=.5] = 1.*(results[0, results[1, :]>=.5]==1)
data.append(data_)
for ax in [ax1, ax2]:
ax.axis([0., 1., -.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax1.set_ylabel('detected lower')
_ = ax1.legend(['lower', 'upper'], loc='right')
_ = ax2.set_ylabel('detected upper')
_ = ax2.legend(['lower', 'upper'], loc='right')
(note the subplots are equivalent up to a flip)
One could not fit psychometric curves for this 2AFC. See for instance https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3110332/
n_hist = 10
C_bins = np.linspace(0.5, 1., n_hist)
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
for data_ in data:
hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
plt.bar(bins[:-1] + (bins[1]-bins[0])/2, (1.*hist_correct)/(1.*hist_total), width=.95*(bins[1]-bins[0]), alpha=.1)
_ = ax.axis([0.5, 1., 0., 1.1])
_ = ax.set_xlabel('contrast')
let's explore another way:
import pypsignifit as psi
data : an array or a list of lists containing stimulus intensities in the first column, number of correct responses (nAFC) or number of YES- responses in the second column, and number of trials in the third column. Each row should correspond to one experimental block. In addition, the sequence of the rows is taken as the sequence of data aquisition. Alternatively, the relative frequencies of correct responses resp YES responses can be given.
stimulus_intensities, percent_correct = np.array([]), np.array([])
for data_ in data:
#print data_.shape
#stimulus_intensities = np.hstack((stimulus_intensities, data_[0, : ]))
#percent_correct = np.hstack((percent_correct, data_[1, : ]))
#print data_[0, : ].min(), data_[0, : ].max(), data_[1, : ].mean()
hist_correct, bins = np.histogram(data_[0, data_[1, : ]==1], bins=C_bins, normed=False)
hist_total, bins = np.histogram(data_[0, : ], bins=C_bins, normed=False)
stimulus_intensities = np.hstack((stimulus_intensities, bins[:-1] + (bins[1]-bins[0])/2))
percent_correct = np.hstack((percent_correct, (1.*hist_correct)/(1.*hist_total)))
num_observations = len(stimulus_intensities)
print 'num_observations: ', num_observations
psidata = np.vstack((stimulus_intensities, percent_correct, np.array([2]*num_observations)))
BootstrapInference
# see http://psignifit.sourceforge.net/DESIGN.html
nafc = 2
constraints = ( 'unconstrained', 'unconstrained', 'Beta(2,20)')
B = psi.BootstrapInference ( psidata, priors=constraints, nafc=nafc )
# (threshold, slope, lapse rate)
print B.estimate
How well do these parameters describe the data? The deviance is a measure that describes the goodness of fit for a model, based on the sum of the squares error metric:
print B.deviance, B.getThres()#, B.getCI(1)
_ = psi.psigniplot.plotPMF(B, xlabel_text='contrast', showaxes=True)
B.sample()
psi.GoodnessOfFit(B)
- Top left: displays the given data points and the fitted psychometric function. Thresholds and confidence intervals are plotted at three levels (default: 25%, 50% and 75% ). Mind that the y-axis starts at 0.5 (the guess rate in a 2AFC task), therefore the 50% threshold is located at . signifies the deviance value.
- Bottom left: histogram of bootstrapped deviance values (default = 2000 samples). The 95% confidence limits are indicated by red dotted lines and the actually observed deviance is indicated by the solid red line. If the observed deviance is outside the 95% confidence limits that indicates a bad fit and you will receive a warning message.
- Top middle: deviance residuals are plotted as a function of the predicted correct response rate of the model (x-axis corresponds to y-axis in panel 1). This plot helps you to detect systematic deviations between the model and the data. The dotted line is the best linear fit that relates deviance residuals to the predicted correct response rate. Rpd gives the numerical value of that correlation. Note that the residuals are scaled to account for differences in the variability of a binomially distributed random variable (e.g. maximum variance at p=0.5).
- Bottom middle: histogram of bootstrapped correlation coefficients for the correlation between residuals and performance level (same logic applies as in panel 2). Dotted lines denote 95% intervals of the sampled correlation coefficients, the solid line marks the observed correlation between model prediction and deviance residuals.
- Top right: deviance residuals are plotted as a function of block index i.e. the sequence in which the data were acquired (WARNING: this graph can be properly interpreted only when stimulus intensities were fixed in separate blocks). If the observer was learning, the fitted linear correlation between residuals and block index should be positive.
- Bottom right: histogram of bootstrapped correlation coefficients for the correlation between deviance residuals and block index (same logic applies as in panel 2 and 4).
priors = ( 'Gauss(0,5)', 'Gamma(1,3)', 'Beta(2,20)' )
mcmc = psi.BayesInference ( psidata, priors=priors, nafc=nafc )
mcmc.estimate
psi.ConvergenceMCMC ( mcmc )
psi.GoodnessOfFit ( mcmc )
psi.ParameterPlot ( mcmc )
psi.plotInfluential(mcmc)
Testing basic functions of Motion Clouds
Motion Clouds: raw principles¶
def show_video(filename):
from IPython.display import display, Image, HTML
from base64 import b64encode
video = open(mc.figpath + filename + mc.vext, "rb").read()
video_encoded = b64encode(video)
video_tag = '<video controls autoplay="autoplay" loop="loop" width=350px src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
display(HTML(data=video_tag))
Introduction¶
Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.
Using mixtures of images¶
Using Fourier ("official Motion Clouds")¶
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
Using ARMA processes¶
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
imshow(lena, cmap=cm.gray)
def noise(image=lena):
for axis in [0, 1]:
image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
return image
imshow(noise(), cmap=cm.gray)
imshow(noise(), cmap=cm.gray)
Now, we define the ARMA process as an averaging process with a certain time constant \(\tau=30.\) (in frames).
def ARMA(image, tau=30.):
image = (1 - 1/tau)* image + 1/tau * noise()
return image
initializing
image = ARMA(lena)
imshow(image, cmap=cm.gray)
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame):
z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
show_video(filename='arma')
Testing discrimination between Motion Clouds with psychopy
Analysis of a discrimination experiment¶
In the psychopy_discrimination.py script, we implemented an experiment to test whether the overall shape of motion clouds could influence discrimination of speed.
Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.
The experiment¶
#%pycat psychopy_discrimination.py
Analysis - version 1¶
In a first version of the experiment, we only stored the results in a numpy file.
!ls data/discriminating_*
import glob
for fn in glob.glob('data/discriminating_*npy'):
results = np.load(fn)
print fn, results.shape
print('analyzing results')
print '# of trials :', numpy.abs(results).sum(axis=1)
print 'average results: ', (results.sum(axis=1)/numpy.abs(results).sum(axis=1)*.5+.5)
%whos
Another solution is to use:
data= 1
np.savez('toto', data=data, results=results)
print np.load('toto.npz')['data']
or directly psychopy.misc:
from psychopy import misc
info = misc.fromFile('data/discriminating.pickle')
alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
fileName = 'data/discriminating_' + info['observer'] + '.pickle'
info['alphas'] = alphas
info['results'] = results
#numpy.savez(fileName, results=results, alphas=alphas)
misc.toFile(fileName, info)
Analysis - version 2¶
In the second version, we stored more data:
import glob
from psychopy import misc
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('data/discriminating_v2_*pickle'):
data = misc.fromFile(fn)
print fn, results.shape
print('analyzing results')
alphas = np.array(data['alphas'])
print ' alphas = ', alphas
print '# of trials :', numpy.abs(data['results']).sum(axis=1)
print 'average results: ', (data['results'].sum(axis=1)/numpy.abs(data['results']).sum(axis=1)*.5+.5)
width = (alphas.max()-alphas.min())/len(alphas)
ax.bar(data['alphas'] - width/2, (data['results'].sum(axis=1)/numpy.abs(data['results']).sum(axis=1)*.5+.5), width=width, alpha=.3 )
# for i_trial in xrange(data['results'].shape[1]):
# ax.scatter(data['alphas'][data['results']
ax.set_xlabel('alpha')
ax.set_ylabel('% correct')
# TODO : correct for the number of trials / make global reading / make a new draw of alphas
# TODO : test f_0